# Import required packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import glob
from moviepy.editor import VideoFileClip
from IPython.display import HTML
%matplotlib inline
# Chessboard dimensions - 9x6
xCorners = 9
yCorners = 6
# Create object points like (0,0,0), (1,0,0), (2,0,0) ....
objp = np.zeros((yCorners * xCorners,3), np.float32)
objp[:,:2] = np.mgrid[0:xCorners, 0:yCorners].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Import all camera calibration images using glob
images = glob.glob('camera_cal/calibration*.jpg')
#Calibration images in which opencv failed to find corners
corners_not_found = []
plt.figure(figsize=(18, 24))
plt.figtext(0.5,0.9,'Images with all corners detected correctly', fontsize=18, ha='center')
# Iterate through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (xCorners, yCorners), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
plt.subplot(6, 3, len(imgpoints))
cv2.drawChessboardCorners(img, (xCorners, yCorners), corners, ret)
plt.imshow(img)
plt.title(fname)
plt.axis('off')
else:
corners_not_found.append(fname)
plt.show()
# Display images
plt.figure(figsize=(18, 24))
plt.figtext(.5,.6,'Images with corners not detected correctly', fontsize=18, ha='center')
for i, p in enumerate(corners_not_found):
plt.subplot(1, 3, i+1)
plt.imshow(cv2.imread(p))
plt.title(p)
plt.axis('off')
plt.show()
# Test undistortion on an image
image = cv2.imread('camera_cal/calibration1.jpg')
image_size = (image.shape[1], image.shape[0])
# Use object points and image points from camera calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, image_size,None,None)
def undistort(image):
return cv2.undistort(image, mtx, dist, None, mtx)
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.title("Original Image", fontsize=30)
plt.subplot(1, 2, 2)
plt.imshow(undistort(image))
plt.title("Undistorted Image", fontsize=30)
plt.savefig("output_images/undistortChessboard.jpg")
plt.show()
# Undistort test image
plt.figure(figsize=(20, 10))
image = cv2.imread('test_images/test5.jpg')
plt.subplot(1, 2, 1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Original Image", fontsize=30)
plt.subplot(1, 2, 2)
undistort_image = undistort(image)
plt.imshow(cv2.cvtColor(undistort_image, cv2.COLOR_BGR2RGB))
plt.title("Undistorted Image", fontsize=30)
plt.savefig("output_images/undistort_test5.jpg")
def gaussianBlur(image, kernel_size):
# Apply a Gaussian Noise kernel
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
def weightedImage(image, initial_image, α=0.5, β=1., γ=0.):
# Add weights to the image
return cv2.addWeighted(initial_image, α, image, β, γ)
def colorAndGradientThres(image, s_thresh=(170, 255), l_thresh=(190, 255), sx_thresh=(25, 100)):
img = np.copy(image)
# Convert to HLS color space
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
# Normalize L channel
l_channel = hls[:,:,1]
l_channel = l_channel*(255/np.max(l_channel))
# Normalize S channel
s_channel = hls[:,:,2]
s_channel = s_channel*(255/np.max(s_channel))
# Sobel x
sobelx = cv2.Sobel(s_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold X gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# S Channel Threshold for yellow colored lane lines
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# L Channel Threshold for white colored lane lines
l_binary = np.zeros_like(l_channel)
l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1
# Combined color and gradient thresholded image
combined_binary = np.zeros_like(sxbinary)
combined_binary[(l_binary == 1) | (s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
# Display images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
ax1.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
ax1.set_title('Original Image', fontsize=20)
colorGradientOut = colorAndGradientThres(undistort_image, s_thresh=(170, 255), l_thresh=(190, 255), sx_thresh=(20, 100))
ax2.imshow(colorGradientOut, cmap='gray')
ax2.set_title('Combined Color and Gradient Thresholded Output', fontsize=20)
# Image Height and Image Width
H,W = colorGradientOut.shape[:2]
print("Height (Y axis vertical): ", H, "Width (X axis horizontal): ", W)
# Define source and destination points for transform
src = np.float32([(450,550), (830,550), (230,700), (1075,700)])
dst = np.float32([(450,0), (W-450,0), (450,H), (W-450,H)])
def perspectiveTransform(image, src, dst):
H,W = image.shape[:2]
# use cv2.getPerspectiveTransform() to get M, the transform matrix, and Minv, the inverse
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# use cv2.warpPerspective() to warp your image to a top-down view
warped = cv2.warpPerspective(image, M, (W,H), flags=cv2.INTER_LINEAR)
return warped, Minv
perspectiveTransformOut, Minv = perspectiveTransform(colorGradientOut, src, dst)
# Display images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
ax1.imshow(colorGradientOut)
x = [src[0][0],src[2][0],src[3][0],src[1][0],src[0][0]]
y = [src[0][1],src[2][1],src[3][1],src[1][1],src[0][1]]
ax1.plot(x, y, color='red', linewidth=3)
ax1.set_ylim([H,0])
ax1.set_xlim([0,W])
ax1.set_title('Thresholded Image', fontsize=30)
ax2.imshow(perspectiveTransformOut)
xt = [dst[0][0],dst[2][0],dst[3][0],dst[1][0],dst[0][0]]
yt = [dst[0][1],dst[2][1],dst[3][1],dst[1][1],dst[0][1]]
ax2.plot(xt, yt, color='red', linewidth=3)
ax2.set_title('Perspective Transformed Image', fontsize=30)
def pipeline(image):
# Define source and destination points for transform
src = np.float32([(450,550), (830,550), (230,700), (1075,700)])
dst = np.float32([(450,0), (W-450,0), (450,H), (W-450,H)])
#Undistort image
undistortOut = undistort(image)
# Gaussian Blur
kernel_size = 5
blurImage = gaussianBlur(undistortOut, kernel_size)
#Color and Gradient thresholding
colorGradientThresOut = colorAndGradientThres(blurImage, s_thresh=(170, 255), l_thresh=(190, 255) , sx_thresh=(25, 100))
# Perspective Transform
perspectiveTransformOut, Minv = perspectiveTransform(colorGradientThresOut, src, dst)
return perspectiveTransformOut, Minv
# Test pipeline on all test images
images = glob.glob('test_images/*.jpg')
# Set up plot
fig, axs = plt.subplots(len(images),2, figsize=(50, 150))
fig.subplots_adjust(hspace = .2, wspace=.05)
axs = axs.ravel()
i = 0
for image in images:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
axs[i].imshow(img)
axs[i].set_title('Original Image', fontsize=50)
axs[i].axis('off')
i += 1
img_bin, Minv = pipeline(img)
axs[i].imshow(img_bin, cmap='gray')
axs[i].set_title('Pipeline Output Image', fontsize=50)
axs[i].axis('off')
i += 1